bitkeeper revision 1.1159.187.21 (41a85113PA1gYVqMumQ4r2S3y6POGw)
authorkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>
Sat, 27 Nov 2004 10:04:03 +0000 (10:04 +0000)
committerkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>
Sat, 27 Nov 2004 10:04:03 +0000 (10:04 +0000)
Clean up and fix the 2.6 entry.S:
 1. Save and restore the event_mask in SAVE_ALL / restore_all.
 2. No need to keep reloading %esi all oevr the place as we can load it
    once on entry (e.g., SAVE_ALL) and thereafter it is callee-saves.
 3. No need for extra XEN_UNBLOCK_EVENTS() where native isn't doing a 'sti',
    even if the code looks broken -- it is okay to call schedule() with
    interrupts off, in which case it will reenable them itself.
 4. Fixed another KERNEL_DS -> USER_DS.
 5. Unmacroed the page fault handler.
 6. A bunch of other tiny fixes....

linux-2.6.9-xen-sparse/arch/xen/i386/kernel/entry.S
linux-2.6.9-xen-sparse/arch/xen/i386/kernel/traps.c

index 82be1d521dcaef6412ec99ccba5d4f14f06335d1..1d19d7986433da709e72fff7a400c65be19117b7 100644 (file)
@@ -64,6 +64,7 @@ ES            = 0x20
 ORIG_EAX       = 0x24
 EIP            = 0x28
 CS             = 0x2C
+EVENT_MASK     = 0x2E
 EFLAGS         = 0x30
 OLDESP         = 0x34
 OLDSS          = 0x38
@@ -81,17 +82,16 @@ VM_MASK             = 0x00020000
 
 #define XEN_BLOCK_EVENTS(reg)  movb $1,evtchn_upcall_mask(reg)
 #define XEN_UNBLOCK_EVENTS(reg)        movb $0,evtchn_upcall_mask(reg)
-#define XEN_TEST_PENDING(reg)  testb $0xFF,evtchn_upcall_pending(%reg)
+#define XEN_TEST_PENDING(reg)  testb $0xFF,evtchn_upcall_pending(reg)
 
 #ifdef CONFIG_PREEMPT
-#define preempt_stop           movl HYPERVISOR_shared_info,%esi        ; \
-                               XEN_BLOCK_EVENTS(%esi)
+#define preempt_stop           XEN_BLOCK_EVENTS(%esi)
 #else
 #define preempt_stop
 #define resume_kernel          restore_all
 #endif
 
-#define SAVE_ALL \
+#define SAVE_ALL_NO_EVENTMASK \
        cld; \
        pushl %es; \
        pushl %ds; \
@@ -104,7 +104,13 @@ VM_MASK            = 0x00020000
        pushl %ebx; \
        movl $(__USER_DS), %edx; \
        movl %edx, %ds; \
-       movl %edx, %es;
+       movl %edx, %es
+
+#define SAVE_ALL \
+       SAVE_ALL_NO_EVENTMASK; \
+       movl HYPERVISOR_shared_info, %esi; \
+       movb evtchn_upcall_mask(%esi), %dl; \
+       movb %dl, EVENT_MASK(%esp)
 
 #define RESTORE_INT_REGS \
        popl %ebx;      \
@@ -208,35 +214,30 @@ ret_from_intr:
        testl $(VM_MASK | 2), %eax
        jz resume_kernel                # returning to kernel or vm86-space
 ENTRY(resume_userspace)
-       movl HYPERVISOR_shared_info,%esi
-       XEN_BLOCK_EVENTS(%esi)          # make tests atomic
-                                       # make sure we don't miss an interrupt
+       XEN_BLOCK_EVENTS(%esi)          # make sure we don't miss an interrupt
                                        # setting need_resched or sigpending
                                        # between sampling and the iret
-ret_syscall_tests:
        movl TI_flags(%ebp), %ecx
        andl $_TIF_WORK_MASK, %ecx      # is there any work to be done on
                                        # int/exception return?
        jne work_pending
-       jmp restore_all_enable_events
+       jmp restore_all
 
 #ifdef CONFIG_PREEMPT
 ENTRY(resume_kernel)
-       movl HYPERVISOR_shared_info,%esi
        cmpl $0,TI_preempt_count(%ebp)  # non-zero preempt_count ?
-       jnz restore_all_enable_events
+       jnz restore_all
 need_resched:
        movl TI_flags(%ebp), %ecx       # need_resched set ?
        testb $_TIF_NEED_RESCHED, %cl
-       jz restore_all_enable_events
+       jz restore_all
        testl $IF_MASK,EFLAGS(%esp)     # interrupts off (exception path) ?
-       jz restore_all_enable_events
+       jz restore_all
        movl $PREEMPT_ACTIVE,TI_preempt_count(%ebp)
-       XEN_UNBLOCK_EVENTS(%esi)        # reenable event callbacks
+       XEN_UNBLOCK_EVENTS(%esi)
        call schedule
        movl $0,TI_preempt_count(%ebp)
-       movl HYPERVISOR_shared_info,%esi
-       XEN_BLOCK_EVENTS(%esi)          # make tests atomic
+       XEN_BLOCK_EVENTS(%esi)
        jmp need_resched
 #endif
 
@@ -269,11 +270,11 @@ sysenter_past_esp:
        pushl %eax
        SAVE_ALL
        GET_THREAD_INFO(%ebp)
-       cmpl $(nr_syscalls), %eax
-       jae syscall_badsys
 
        testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
        jnz syscall_trace_entry
+       cmpl $(nr_syscalls), %eax
+       jae syscall_badsys
        call *sys_call_table(,%eax,4)
        movl %eax,EAX(%esp)
        cli
@@ -292,48 +293,43 @@ ENTRY(system_call)
        pushl %eax                      # save orig_eax
        SAVE_ALL
        GET_THREAD_INFO(%ebp)
-       cmpl $(nr_syscalls), %eax
-       jae syscall_badsys
                                        # system call tracing in operation
        testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
        jnz syscall_trace_entry
+       cmpl $(nr_syscalls), %eax
+       jae syscall_badsys
 syscall_call:
        call *sys_call_table(,%eax,4)
        movl %eax,EAX(%esp)             # store the return value
 syscall_exit:
-       movl HYPERVISOR_shared_info,%esi
-       XEN_BLOCK_EVENTS(%esi)          # make tests atomic
-                                       # make sure we don't miss an interrupt
+       XEN_BLOCK_EVENTS(%esi)          # make sure we don't miss an interrupt
                                        # setting need_resched or sigpending
                                        # between sampling and the iret
        movl TI_flags(%ebp), %ecx
        testw $_TIF_ALLWORK_MASK, %cx   # current->work
        jne syscall_exit_work
-       jmp restore_all_enable_events
-
-       ALIGN
 restore_all:
+       movb EVENT_MASK(%esp), %al
+       notb %al                        # %al == ~saved_mask
+       andb evtchn_upcall_mask(%esi),%al
+       andb $1,%al                     # %al == mask & ~saved_mask
+       jnz restore_all_enable_events   #     != 0 => reenable event delivery
        RESTORE_ALL
 
        # perform work that needs to be done immediately before resumption
        ALIGN
 work_pending:
-       XEN_UNBLOCK_EVENTS(%esi)        # reenable event callbacks
        testb $_TIF_NEED_RESCHED, %cl
        jz work_notifysig
 work_resched:
        call schedule
-       movl HYPERVISOR_shared_info,%esi
-       XEN_BLOCK_EVENTS(%esi)          # make tests atomic
-                                       # make sure we don't miss an interrupt
+       XEN_BLOCK_EVENTS(%esi)          # make sure we don't miss an interrupt
                                        # setting need_resched or sigpending
                                        # between sampling and the iret
        movl TI_flags(%ebp), %ecx
        andl $_TIF_WORK_MASK, %ecx      # is there any work to be done other
                                        # than syscall tracing?
-       jz restore_all_enable_events
-       # XXXcl sti missing???
-       XEN_UNBLOCK_EVENTS(%esi)        # reenable event callbacks
+       jz restore_all
        testb $_TIF_NEED_RESCHED, %cl
        jnz work_resched
 
@@ -345,8 +341,7 @@ work_notifysig:                             # deal with pending signals and
                                        # vm86-space
        xorl %edx, %edx
        call do_notify_resume
-       movl HYPERVISOR_shared_info,%esi
-       jmp restore_all_enable_events
+       jmp restore_all
 
        ALIGN
 work_notifysig_v86:
@@ -356,8 +351,7 @@ work_notifysig_v86:
        movl %eax, %esp
        xorl %edx, %edx
        call do_notify_resume
-       movl HYPERVISOR_shared_info,%esi
-       jmp restore_all_enable_events
+       jmp restore_all
 
        # perform syscall exit tracing
        ALIGN
@@ -374,11 +368,9 @@ syscall_trace_entry:
        # perform syscall exit tracing
        ALIGN
 syscall_exit_work:
-       movl HYPERVISOR_shared_info,%esi
-       testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT), %cl
+       testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
        jz work_pending
-       XEN_UNBLOCK_EVENTS(%esi)        # reenable event callbacks
-                                       # could let do_syscall_trace() call
+       XEN_UNBLOCK_EVENTS(%esi)        # could let do_syscall_trace() call
                                        # schedule() instead
        movl %esp, %eax
        movl $1, %edx
@@ -398,6 +390,44 @@ syscall_badsys:
        movl $-ENOSYS,EAX(%esp)
        jmp resume_userspace
 
+#if 0 /* XEN */
+/*
+ * Build the entry stubs and pointer table with
+ * some assembler magic.
+ */
+.data
+ENTRY(interrupt)
+.text
+
+vector=0
+ENTRY(irq_entries_start)
+.rept NR_IRQS
+       ALIGN
+1:     pushl $vector-256
+       jmp common_interrupt
+.data
+       .long 1b
+.text
+vector=vector+1
+.endr
+
+       ALIGN
+common_interrupt:
+       SAVE_ALL
+       call do_IRQ
+       jmp ret_from_intr
+
+#define BUILD_INTERRUPT(name, nr)      \
+ENTRY(name)                            \
+       pushl $nr-256;                  \
+       SAVE_ALL                        \
+       call smp_/**/name;      \
+       jmp ret_from_intr;
+
+/* The include is where all of the SMP etc. interrupts come from */
+#include "entry_arch.h"
+#endif /* XEN */
+
 ENTRY(divide_error)
        pushl $0                        # no error code
        pushl $do_divide_error
@@ -422,9 +452,12 @@ error_code:
        movl %esp, %edx
        pushl %esi                      # push the error code
        pushl %edx                      # push the pt_regs pointer
-       movl $(__KERNEL_DS), %edx       # XXXcl USER?
+       movl $(__USER_DS), %edx
        movl %edx, %ds
        movl %edx, %es
+       movl HYPERVISOR_shared_info, %esi
+       movb evtchn_upcall_mask(%esi), %dl
+       movb %dl, EVENT_MASK+8(%esp)
        call *%edi
        addl $8, %esp
        jmp ret_from_exception
@@ -442,24 +475,24 @@ error_code:
 # activation and restart the handler using the previous one.
 ENTRY(hypervisor_callback)
        pushl %eax
-       SAVE_ALL
-       GET_THREAD_INFO(%ebp)
+       SAVE_ALL_NO_EVENTMASK
        movl EIP(%esp),%eax
        cmpl $scrit,%eax
        jb   11f
        cmpl $ecrit,%eax
        jb   critical_region_fixup
-11:    push %esp
+11:    movl HYPERVISOR_shared_info, %esi
+       movb $0, EVENT_MASK(%esp)
+       push %esp
        call evtchn_do_upcall
        add  $4,%esp
-       movl HYPERVISOR_shared_info,%esi
-       movb CS(%esp),%cl
-       test $2,%cl                     # slow return to ring 2 or 3
-       jne  ret_syscall_tests
+       jmp  ret_from_intr
+
+        ALIGN
 restore_all_enable_events:  
-safesti:XEN_UNBLOCK_EVENTS(%esi)       # reenable event callbacks
+       XEN_UNBLOCK_EVENTS(%esi)
 scrit: /**** START OF CRITICAL REGION ****/
-       testb $1,evtchn_upcall_pending(%esi)
+       XEN_TEST_PENDING(%esi)
        jnz  14f                        # process more events if necessary...
        RESTORE_ALL
 14:    XEN_BLOCK_EVENTS(%esi)
@@ -583,11 +616,18 @@ ENTRY(debug)
        jne debug_stack_correct
        FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
 debug_stack_correct:
-       pushl $0
-       pushl $do_debug
-       jmp error_code
+       pushl $-1                       # mark this as an int
+       SAVE_ALL
+       movl %esp,%edx
+       pushl $0
+       pushl %edx
+       call do_debug
+       addl $8,%esp
+       testl %eax,%eax
+       jnz restore_all
+       jmp ret_from_exception
 
-#if 0
+#if 0 /* XEN */
 /*
  * NMI is doubly nasty. It can happen _while_ we're handling
  * a debug fault, and the debug fault hasn't yet been able to
@@ -633,12 +673,19 @@ nmi_debug_stack_check:
 nmi_debug_stack_fixup:
        FIX_STACK(24,nmi_stack_correct, 1)
        jmp nmi_stack_correct
-#endif
+#endif /* XEN */
 
 ENTRY(int3)
+       pushl $-1                       # mark this as an int
+       SAVE_ALL
+       movl %esp,%edx
        pushl $0
-       pushl $do_int3
-       jmp error_code
+       pushl %edx
+       call do_int3
+       addl $8,%esp
+       testl %eax,%eax
+       jnz restore_all
+       jmp ret_from_exception
 
 ENTRY(overflow)
        pushl $0
@@ -660,10 +707,6 @@ ENTRY(coprocessor_segment_overrun)
        pushl $do_coprocessor_segment_overrun
        jmp error_code
 
-ENTRY(double_fault)
-       pushl $do_double_fault
-       jmp error_code
-
 ENTRY(invalid_TSS)
        pushl $do_invalid_TSS
        jmp error_code
@@ -686,36 +729,37 @@ ENTRY(alignment_check)
 
 # This handler is special, because it gets an extra value on its stack,
 # which is the linear faulting address.
-#define PAGE_FAULT_STUB(_name1, _name2)                                          \
-ENTRY(_name1)                                                            \
-       pushl %ds                                                       ; \
-       pushl %eax                                                      ; \
-       xorl %eax,%eax                                                  ; \
-       pushl %ebp                                                      ; \
-       pushl %edi                                                      ; \
-       pushl %esi                                                      ; \
-       pushl %edx                                                      ; \
-       decl %eax                       /* eax = -1 */                  ; \
-       pushl %ecx                                                      ; \
-       pushl %ebx                                                      ; \
-       GET_THREAD_INFO(%ebp)                                           ; \
-       cld                                                             ; \
-       movl %es,%ecx                                                   ; \
-       movl ORIG_EAX(%esp), %esi       /* get the error code */        ; \
-       movl ES(%esp), %edi             /* get the faulting address */  ; \
-       movl %eax, ORIG_EAX(%esp)                                       ; \
-       movl %ecx, ES(%esp)                                             ; \
-       movl %esp,%edx                                                  ; \
-       pushl %edi                      /* push the faulting address */ ; \
-       pushl %esi                      /* push the error code */       ; \
-       pushl %edx                      /* push the pt_regs pointer */  ; \
-       movl $(__KERNEL_DS),%edx                                        ; \
-       movl %edx,%ds                                                   ; \
-       movl %edx,%es                                                   ; \
-       call _name2                                                     ; \
-       addl $12,%esp                                                   ; \
-       jmp ret_from_exception                                          ;
-PAGE_FAULT_STUB(page_fault, do_page_fault)
+ENTRY(page_fault)
+       pushl %ds
+       pushl %eax
+       xorl %eax,%eax
+       pushl %ebp
+       pushl %edi
+       pushl %esi
+       pushl %edx
+       decl %eax                       /* eax = -1 */
+       pushl %ecx
+       pushl %ebx
+       GET_THREAD_INFO(%ebp)
+       cld
+       movl %es,%ecx
+       movl ORIG_EAX(%esp), %esi       /* get the error code */
+       movl ES(%esp), %edi             /* get the faulting address */
+       movl %eax, ORIG_EAX(%esp)
+       movl %ecx, ES(%esp)
+       movl %esp,%edx
+       pushl %edi                      /* push the faulting address */
+       pushl %esi                      /* push the error code */
+       pushl %edx                      /* push the pt_regs pointer */
+       movl $(__KERNEL_DS),%edx
+       movl %edx,%ds
+       movl %edx,%es
+       movl HYPERVISOR_shared_info, %esi
+       movb evtchn_upcall_mask(%esi), %dl
+       movb %dl, EVENT_MASK+12(%esp)
+       call do_page_fault
+       addl $12,%esp
+       jmp ret_from_exception
 
 #ifdef CONFIG_X86_MCE
 ENTRY(machine_check)
@@ -1014,5 +1058,6 @@ ENTRY(sys_call_table)
        .long sys_mq_notify
        .long sys_mq_getsetattr
        .long sys_ni_syscall            /* reserved for kexec */
+       .long sys_waitid
 
 syscall_table_size=(.-sys_call_table)
index 9b2fa628e1d51bbd755ba4fc689a4daf888af181..7c009260537a469d51223feeb8411caa9bad63b5 100644 (file)
@@ -78,7 +78,6 @@ asmlinkage void overflow(void);
 asmlinkage void bounds(void);
 asmlinkage void invalid_op(void);
 asmlinkage void device_not_available(void);
-asmlinkage void double_fault(void);
 asmlinkage void coprocessor_segment_overrun(void);
 asmlinkage void invalid_TSS(void);
 asmlinkage void segment_not_present(void);
@@ -470,7 +469,6 @@ DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow)
 DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds)
 DO_ERROR_INFO( 6, SIGILL,  "invalid operand", invalid_op, ILL_ILLOPN, regs->eip)
 DO_VM86_ERROR( 7, SIGSEGV, "device not available", device_not_available)
-DO_ERROR( 8, SIGSEGV, "double fault", double_fault)
 DO_ERROR( 9, SIGFPE,  "coprocessor segment overrun", coprocessor_segment_overrun)
 DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
 DO_ERROR(11, SIGBUS,  "segment not present", segment_not_present)
@@ -1032,7 +1030,6 @@ static trap_info_t trap_table[] = {
        {  5, 3, __KERNEL_CS, (unsigned long)bounds                     },
        {  6, 0, __KERNEL_CS, (unsigned long)invalid_op                 },
        {  7, 0, __KERNEL_CS, (unsigned long)device_not_available       },
-       {  8, 0, __KERNEL_CS, (unsigned long)double_fault               },
        {  9, 0, __KERNEL_CS, (unsigned long)coprocessor_segment_overrun },
        { 10, 0, __KERNEL_CS, (unsigned long)invalid_TSS                },
        { 11, 0, __KERNEL_CS, (unsigned long)segment_not_present        },